from constants import *
from utils import evaluate_model_policy, plot_study, plot_fig
from trainer import get_trained_model
import optuna
from environment import StreetFighterEnv
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack
from actor_critic import A2CCNNPolicy
from feature_extractors import CNNExtractorWithAttention, CNNExtractor
from tuner import Tuner
import os
from layers import ActorCriticLayer
TIMESTEPS = 1
N_TRIALS = 2
PLOTLY_CONFIG = {"staticPlot": True}
model = A2C
model_dir = 'models/without_bias'
env = StreetFighterEnv(capture_movement=False)
policy_network = A2CCNNPolicy
policy_kwargs = dict(
features_extractor_class=CNNExtractor,
features_extractor_kwargs=dict(features_dim=512,),
actor_critic_class=ActorCriticLayer
)
tuner = Tuner(model=model, env=env, policy_network=policy_network, policy_args=policy_kwargs,
timesteps=TIMESTEPS, save_dir=model_dir)
study = tuner.tune_study(n_trials=N_TRIALS, )
study.best_trial.number, study.best_params
[I 2022-04-18 02:58:46,143] A new study created in memory with name: no-name-ca7a6456-6d32-45c6-ade3-4e7d6f59e307
[I 2022-04-18 02:59:54,262] Trial 0 finished with value: 0.0 and parameters: {'gamma': 0.8063723267679768, 'learning_rate': 1.1787123676611433e-05, 'gae_lambda': 0.9755434745506465}. Best is trial 0 with value: 0.0. [I 2022-04-18 03:00:36,981] Trial 1 finished with value: 2000.0 and parameters: {'gamma': 0.8100519856881473, 'learning_rate': 9.419068464057366e-05, 'gae_lambda': 0.9338440521307758}. Best is trial 1 with value: 2000.0.
(1,
{'gamma': 0.8100519856881473,
'learning_rate': 9.419068464057366e-05,
'gae_lambda': 0.9338440521307758})
plots = plot_study(study)
for plot in plots:
plot.show("notebook", config=PLOTLY_CONFIG)
model = A2C
model_dir = 'models/without_bias_with_movement'
env = StreetFighterEnv(capture_movement=True)
policy_network = A2CCNNPolicy
policy_kwargs = dict(
features_extractor_class=CNNExtractor,
features_extractor_kwargs=dict(features_dim=512,),
actor_critic_class=ActorCriticLayer
)
tuner = Tuner(model=model, env=env, policy_network=policy_network, policy_args=policy_kwargs,
timesteps=TIMESTEPS, save_dir=model_dir)
study = tuner.tune_study(n_trials=N_TRIALS, )
study.best_trial.number, study.best_params
[I 2022-04-18 03:00:37,348] A new study created in memory with name: no-name-efc732e2-1235-44f7-ac08-4d641d2cabfa
[I 2022-04-18 03:01:21,525] Trial 0 finished with value: 0.0 and parameters: {'gamma': 0.8438188088692959, 'learning_rate': 2.757152843655376e-05, 'gae_lambda': 0.9463166059853156}. Best is trial 0 with value: 0.0. [I 2022-04-18 03:02:07,433] Trial 1 finished with value: 0.0 and parameters: {'gamma': 0.8666524888838817, 'learning_rate': 1.432920433303445e-05, 'gae_lambda': 0.8395785149499295}. Best is trial 0 with value: 0.0.
(0,
{'gamma': 0.8438188088692959,
'learning_rate': 2.757152843655376e-05,
'gae_lambda': 0.9463166059853156})
plots = plot_study(study)
for plot in plots:
plot.show("notebook", config=PLOTLY_CONFIG)